use MWException;
use WANObjectCache;
use Wikimedia\Assert\Assert;
-use Wikimedia\Rdbms\Database;
use Wikimedia\Rdbms\IDatabase;
use Wikimedia\Rdbms\LoadBalancer;
$this->getCacheKey( $blobAddress ),
$this->getCacheTTL(),
function ( $unused, &$ttl, &$setOpts ) use ( $blobAddress, $queryFlags ) {
- list( $index ) = DBAccessObjectUtils::getDBOptions( $queryFlags );
- $setOpts += Database::getCacheSetOptions( $this->getDBConnection( $index ) );
-
+ // Ignore $setOpts; blobs are immutable and negatives are not cached
return $this->fetchBlob( $blobAddress, $queryFlags );
},
[ 'pcGroup' => self::TEXT_CACHE_GROUP, 'pcTTL' => IExpiringStore::TTL_PROC_LONG ]
$this->getCacheKey( $cacheKey ),
$this->getCacheTTL(),
function () use ( $url, $flags ) {
- // No negative caching per BlobStore::getBlob()
+ // Ignore $setOpts; blobs are immutable and negatives are not cached
$blob = ExternalStore::fetchFromURL( $url, [ 'wiki' => $this->wikiId ] );
return $blob === false ? false : $this->decompressData( $blob, $flags );
* @since 1.25
*/
public static function parseAndStash( WikiPage $page, Content $content, User $user, $summary ) {
- $cache = ObjectCache::getLocalClusterInstance();
$logger = LoggerFactory::getInstance( 'StashEdit' );
$title = $page->getTitle();
$cutoffTime = time() - self::PRESUME_FRESH_TTL_SEC;
// Reuse any freshly build matching edit stash cache
- $editInfo = $cache->get( $key );
+ $editInfo = self::getStashValue( $key );
if ( $editInfo && wfTimestamp( TS_UNIX, $editInfo->timestamp ) >= $cutoffTime ) {
$alreadyCached = true;
} else {
return self::ERROR_NONE;
}
- list( $stashInfo, $ttl, $code ) = self::buildStashValue(
+ $code = self::storeStashValue(
+ $key,
$editInfo->pstContent,
$editInfo->output,
$editInfo->timestamp,
$user
);
- if ( $stashInfo ) {
- $ok = $cache->set( $key, $stashInfo, $ttl );
- if ( $ok ) {
- $logger->debug( "Cached parser output for key '{cachekey}' ('{title}').",
- [ 'cachekey' => $key, 'title' => $titleStr ] );
- return self::ERROR_NONE;
- } else {
- $logger->error( "Failed to cache parser output for key '{cachekey}' ('{title}').",
- [ 'cachekey' => $key, 'title' => $titleStr ] );
- return self::ERROR_CACHE;
- }
- } else {
- // @todo Doesn't seem reachable, see @todo in buildStashValue
- $logger->info( "Uncacheable parser output for key '{cachekey}' ('{title}') [{code}].",
+ if ( $code === true ) {
+ $logger->debug( "Cached parser output for key '{cachekey}' ('{title}').",
+ [ 'cachekey' => $key, 'title' => $titleStr ] );
+ return self::ERROR_NONE;
+ } elseif ( $code === 'uncacheable' ) {
+ $logger->info(
+ "Uncacheable parser output for key '{cachekey}' ('{title}') [{code}].",
[ 'cachekey' => $key, 'title' => $titleStr, 'code' => $code ] );
return self::ERROR_UNCACHEABLE;
+ } else {
+ $logger->error( "Failed to cache parser output for key '{cachekey}' ('{title}').",
+ [ 'cachekey' => $key, 'title' => $titleStr, 'code' => $code ] );
+ return self::ERROR_CACHE;
}
}
return false; // bots never stash - don't pollute stats
}
- $cache = ObjectCache::getLocalClusterInstance();
$logger = LoggerFactory::getInstance( 'StashEdit' );
$stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
$key = self::getStashKey( $title, self::getContentHash( $content ), $user );
- $editInfo = $cache->get( $key );
+ $editInfo = self::getStashValue( $key );
if ( !is_object( $editInfo ) ) {
$start = microtime( true );
// We ignore user aborts and keep parsing. Block on any prior parsing
$lb = MediaWikiServices::getInstance()->getDBLoadBalancer();
$dbw = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
if ( $dbw && $dbw->lock( $key, __METHOD__, 30 ) ) {
- $editInfo = $cache->get( $key );
+ $editInfo = self::getStashValue( $key );
$dbw->unlock( $key, __METHOD__ );
}
*/
private static function getStashKey( Title $title, $contentHash, User $user ) {
return ObjectCache::getLocalClusterInstance()->makeKey(
- 'prepared-edit',
+ 'stashed-edit-info',
md5( $title->getPrefixedDBkey() ),
// Account for the edit model/text
$contentHash,
);
}
+ /**
+ * @param string $uuid
+ * @return string
+ */
+ private static function getStashParserOutputKey( $uuid ) {
+ return ObjectCache::getLocalClusterInstance()->makeKey( 'stashed-edit-output', $uuid );
+ }
+
+ /**
+ * @param string $key
+ * @return stdClass|bool Object map (pstContent,output,outputID,timestamp,edits) or false
+ */
+ private static function getStashValue( $key ) {
+ $cache = ObjectCache::getLocalClusterInstance();
+
+ $stashInfo = $cache->get( $key );
+ if ( !is_object( $stashInfo ) ) {
+ return false;
+ }
+
+ $parserOutputKey = self::getStashParserOutputKey( $stashInfo->outputID );
+ $parserOutput = $cache->get( $parserOutputKey );
+ if ( $parserOutput instanceof ParserOutput ) {
+ $stashInfo->output = $parserOutput;
+
+ return $stashInfo;
+ }
+
+ return false;
+ }
+
/**
* Build a value to store in memcached based on the PST content and parser output
*
* This makes a simple version of WikiPage::prepareContentForEdit() as stash info
*
+ * @param string $key
* @param Content $pstContent Pre-Save transformed content
* @param ParserOutput $parserOutput
* @param string $timestamp TS_MW
* @param User $user
- * @return array (stash info array, TTL in seconds, info code) or (null, 0, info code)
+ * @return string|bool True or an error code
*/
- private static function buildStashValue(
- Content $pstContent, ParserOutput $parserOutput, $timestamp, User $user
+ private static function storeStashValue(
+ $key, Content $pstContent, ParserOutput $parserOutput, $timestamp, User $user
) {
// If an item is renewed, mind the cache TTL determined by config and parser functions.
// Put an upper limit on the TTL for sanity to avoid extreme template/file staleness.
- $since = time() - wfTimestamp( TS_UNIX, $parserOutput->getCacheTime() );
- $ttl = min( $parserOutput->getCacheExpiry() - $since, self::MAX_CACHE_TTL );
-
+ $age = time() - wfTimestamp( TS_UNIX, $parserOutput->getCacheTime() );
+ $ttl = min( $parserOutput->getCacheExpiry() - $age, self::MAX_CACHE_TTL );
// Avoid extremely stale user signature timestamps (T84843)
if ( $parserOutput->getFlag( 'user-signature' ) ) {
$ttl = min( $ttl, self::MAX_SIGNATURE_TTL );
}
if ( $ttl <= 0 ) {
- // @todo It doesn't seem like this can occur, because it would mean an entry older than
- // getCacheExpiry() seconds, which is much longer than PRESUME_FRESH_TTL_SEC, and
- // anything older than PRESUME_FRESH_TTL_SEC will have been thrown out already.
- return [ null, 0, 'no_ttl' ];
+ return 'uncacheable'; // low TTL due to a tag, magic word, or signature?
}
- // Only store what is actually needed
+ // Store what is actually needed and split the output into another key (T204742)
+ $parseroutputID = md5( $key );
$stashInfo = (object)[
'pstContent' => $pstContent,
- 'output' => $parserOutput,
+ 'outputID' => $parseroutputID,
'timestamp' => $timestamp,
'edits' => $user->getEditCount()
];
- return [ $stashInfo, $ttl, 'ok' ];
+ $cache = ObjectCache::getLocalClusterInstance();
+ $ok = $cache->set( $key, $stashInfo, $ttl );
+ if ( $ok ) {
+ $ok = $cache->set(
+ self::getStashParserOutputKey( $parseroutputID ),
+ $parserOutput,
+ $ttl
+ );
+ }
+
+ return $ok ? true : 'store_error';
}
public function getAllowedParams() {
return $value;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
apc_delete( $key . self::KEY_SUFFIX );
return true;
return true;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
apcu_delete( $key . self::KEY_SUFFIX );
return true;
*
* @param string $key
* @return bool True if the item was deleted or not found, false on failure
+ * @param int $flags Bitfield of BagOStuff::WRITE_* constants
*/
- abstract public function delete( $key );
+ abstract public function delete( $key, $flags = 0 );
/**
* Merge changes into the existing cache value (possibly creating a new one)
* The callbacks may or may not be called ever, in any particular order.
* They are likely to be invoked when something WRITE_SYNC is used used.
* They should follow a caching pattern as shown below, so that any code
- * using the word will get it's result no matter what happens.
+ * using the work will get it's result no matter what happens.
* @code
* $result = null;
* $workCallback = function () use ( &$result ) {
}
public function delete( $key, $flags = 0 ) {
- parent::delete( $key );
+ parent::delete( $key, $flags );
if ( !( $flags & self::WRITE_CACHE_ONLY ) ) {
$this->backend->delete( $key );
}
return true;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
return true;
}
return true;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
unset( $this->bag[$key] );
return true;
$value, $this->fixExpiry( $exptime ) );
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
return $this->client->delete( $this->validateKeyEncoding( $key ) );
}
return $this->checkResult( $key, parent::cas( $casToken, $key, $value, $exptime ) );
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
$this->debugLog( "delete($key)" );
$result = parent::delete( $key );
if ( $result === false && $this->client->getResultCode() === Memcached::RES_NOTFOUND ) {
return $this->doWrite( $this->cacheIndexes, $asyncWrites, 'set', $key, $value, $exptime );
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
return $this->doWrite( $this->cacheIndexes, $this->asyncWrites, 'delete', $key );
}
return false;
}
- /**
- * Set an item
- *
- * @param string $key
- * @param mixed $value
- * @param int $exptime Either an interval in seconds or a unix timestamp for expiry
- * @param int $flags Bitfield of BagOStuff::WRITE_* constants
- * @return bool Success
- */
public function set( $key, $value, $exptime = 0, $flags = 0 ) {
+ // @TODO: respect WRITE_SYNC (e.g. EACH_QUORUM)
$req = [
'method' => 'PUT',
'url' => $this->url . rawurlencode( $key ),
return $this->handleError( "Failed to store $key", $rcode, $rerr );
}
- /**
- * Delete an item.
- *
- * @param string $key
- * @return bool True if the item was deleted or not found, false on failure
- */
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
+ // @TODO: respect WRITE_SYNC (e.g. EACH_QUORUM)
$req = [
'method' => 'DELETE',
'url' => $this->url . rawurlencode( $key ),
return $result;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
list( $server, $conn ) = $this->getConnection( $key );
if ( !$conn ) {
return false;
return $this->writeStore->set( $key, $value, $exptime, $flags );
}
- public function delete( $key ) {
- return $this->writeStore->delete( $key );
+ public function delete( $key, $flags = 0 ) {
+ return $this->writeStore->delete( $key, $flags );
}
public function add( $key, $value, $exptime = 0 ) {
return ( is_array( $result ) && $result === [] ) || $result;
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
wincache_ucache_delete( $key );
return true;
namespace Wikimedia\Rdbms;
/**
- * Exception class for replica DB wait timeouts
- * @deprecated since 1.32
+ * Exception class for replica DB wait errors
* @ingroup Database
*/
class DBReplicationWaitError extends DBExpectedError {
return (bool)$db->affectedRows();
}
- public function delete( $key ) {
+ public function delete( $key, $flags = 0 ) {
+ $ok = true;
+
list( $serverIndex, $tableName ) = $this->getTableByKey( $key );
$db = null;
$silenceScope = $this->silenceTransactionProfiler();
__METHOD__ );
} catch ( DBError $e ) {
$this->handleWriteError( $e, $db, $serverIndex );
- return false;
+ $ok = false;
+ }
+ if ( ( $flags & self::WRITE_SYNC ) == self::WRITE_SYNC ) {
+ $ok = $this->waitForReplication() && $ok;
}
- return true;
+ return $ok;
}
public function incr( $key, $step = 1 ) {
if ( $exception instanceof DBConnectionError ) {
$this->markServerDown( $exception, $serverIndex );
}
- $this->logger->error( "DBError: {$exception->getMessage()}" );
- if ( $exception instanceof DBConnectionError ) {
- $this->setLastError( BagOStuff::ERR_UNREACHABLE );
- $this->logger->debug( __METHOD__ . ": ignoring connection error" );
- } else {
- $this->setLastError( BagOStuff::ERR_UNEXPECTED );
- $this->logger->debug( __METHOD__ . ": ignoring query error" );
- }
+
+ $this->setAndLogDBError( $exception );
}
/**
protected function handleWriteError( DBError $exception, IDatabase $db = null, $serverIndex ) {
if ( !$db ) {
$this->markServerDown( $exception, $serverIndex );
- } elseif ( $db->wasReadOnlyError() ) {
- if ( $db->trxLevel() && $this->usesMainDB() ) {
- // Errors like deadlocks and connection drops already cause rollback.
- // For consistency, we have no choice but to throw an error and trigger
- // complete rollback if the main DB is also being used as the cache DB.
- throw $exception;
- }
}
+ $this->setAndLogDBError( $exception );
+ }
+
+ /**
+ * @param DBError $exception
+ */
+ private function setAndLogDBError( DBError $exception ) {
$this->logger->error( "DBError: {$exception->getMessage()}" );
if ( $exception instanceof DBConnectionError ) {
$this->setLastError( BagOStuff::ERR_UNREACHABLE );
}
// Main LB is used; wait for any replica DBs to catch up
- $masterPos = $lb->getMasterPos();
- if ( !$masterPos ) {
- return true; // not applicable
- }
+ try {
+ $masterPos = $lb->getMasterPos();
+ if ( !$masterPos ) {
+ return true; // not applicable
+ }
- $loop = new WaitConditionLoop(
- function () use ( $lb, $masterPos ) {
- return $lb->waitForAll( $masterPos, 1 );
- },
- $this->syncTimeout,
- $this->busyCallbacks
- );
+ $loop = new WaitConditionLoop(
+ function () use ( $lb, $masterPos ) {
+ return $lb->waitForAll( $masterPos, 1 );
+ },
+ $this->syncTimeout,
+ $this->busyCallbacks
+ );
- return ( $loop->invoke() === $loop::CONDITION_REACHED );
+ return ( $loop->invoke() === $loop::CONDITION_REACHED );
+ } catch ( DBError $e ) {
+ $this->setAndLogDBError( $e );
+
+ return false;
+ }
}
/**
* @ingroup Pager
*/
abstract class IndexPager extends ContextSource implements Pager {
- /**
- * Constants for the $mDefaultDirection field.
- *
- * These are boolean for historical reasons and should stay boolean for backwards-compatibility.
- */
+ /** Backwards-compatible constant for $mDefaultDirection field (do not change) */
const DIR_ASCENDING = false;
+ /** Backwards-compatible constant for $mDefaultDirection field (do not change) */
const DIR_DESCENDING = true;
+ /** Backwards-compatible constant for reallyDoQuery() (do not change) */
+ const QUERY_ASCENDING = true;
+ /** Backwards-compatible constant for reallyDoQuery() (do not change) */
+ const QUERY_DESCENDING = false;
+
/** @var WebRequest */
public $mRequest;
/** @var int[] List of default entry limit options to be presented to clients */
public $mLimitsShown = [ 20, 50, 100, 250, 500 ];
/** @var int The default entry limit choosen for clients */
public $mDefaultLimit = 50;
- /** @var string|int The starting point to enumerate entries */
+ /** @var mixed The starting point to enumerate entries */
public $mOffset;
/** @var int The maximum number of entries to show */
public $mLimit;
public $mQueryDone = false;
/** @var IDatabase */
public $mDb;
- /** @var stdClass|null Extra row fetched at the end to see if the end was reached */
+ /** @var stdClass|bool|null Extra row fetched at the end to see if the end was reached */
public $mPastTheEndRow;
/**
* The index to actually be used for ordering. This is a single column,
* for one ordering, even if multiple orderings are supported.
+ * @var string
*/
protected $mIndexField;
/**
* An array of secondary columns to order by. These fields are not part of the offset.
* This is a column list for one ordering, even if multiple orderings are supported.
+ * @var string[]
*/
protected $mExtraSortFields;
/** For pages that support multiple types of ordering, which one to use.
+ * @var string|null
*/
protected $mOrderType;
/**
*
* Like $mIndexField, $mDefaultDirection will be a single value even if the
* class supports multiple default directions for different order types.
+ * @var bool
*/
public $mDefaultDirection;
+ /** @var bool */
public $mIsBackwards;
- /** True if the current result set is the first one */
+ /** @var bool True if the current result set is the first one */
public $mIsFirst;
+ /** @var bool */
public $mIsLast;
- protected $mLastShown, $mFirstShown, $mPastTheEndIndex, $mDefaultQuery, $mNavigationBar;
+ /** @var mixed */
+ protected $mLastShown;
+ /** @var mixed */
+ protected $mFirstShown;
+ /** @var mixed */
+ protected $mPastTheEndIndex;
+ /** @var array */
+ protected $mDefaultQuery;
+ /** @var string */
+ protected $mNavigationBar;
/**
* Whether to include the offset in the query
+ * @var bool
*/
protected $mIncludeOffset = false;
public function doQuery() {
# Use the child class name for profiling
$fname = __METHOD__ . ' (' . static::class . ')';
+ /** @noinspection PhpUnusedLocalVariableInspection */
$section = Profiler::instance()->scopedProfileIn( $fname );
- $descending = $this->mIsBackwards
- ? ( $this->mDefaultDirection === self::DIR_DESCENDING )
- : ( $this->mDefaultDirection === self::DIR_ASCENDING );
+ $defaultOrder = ( $this->mDefaultDirection === self::DIR_ASCENDING )
+ ? self::QUERY_ASCENDING
+ : self::QUERY_DESCENDING;
+ $order = $this->mIsBackwards ? self::oppositeOrder( $defaultOrder ) : $defaultOrder;
# Plus an extra row so that we can tell the "next" link should be shown
$queryLimit = $this->mLimit + 1;
// direction see if we get a row.
$oldIncludeOffset = $this->mIncludeOffset;
$this->mIncludeOffset = !$this->mIncludeOffset;
- $isFirst = !$this->reallyDoQuery( $this->mOffset, 1, !$descending )->numRows();
+ $oppositeOrder = self::oppositeOrder( $order );
+ $isFirst = !$this->reallyDoQuery( $this->mOffset, 1, $oppositeOrder )->numRows();
$this->mIncludeOffset = $oldIncludeOffset;
}
$this->mResult = $this->reallyDoQuery(
$this->mOffset,
$queryLimit,
- $descending
+ $order
);
$this->extractResultInfo( $isFirst, $queryLimit, $this->mResult );
$this->mResult->rewind(); // Paranoia
}
+ /**
+ * @param bool $order One of the IndexPager::QUERY_* class constants
+ * @return bool The opposite query order as an IndexPager::QUERY_ constant
+ */
+ final protected static function oppositeOrder( $order ) {
+ return ( $order === self::QUERY_ASCENDING )
+ ? self::QUERY_DESCENDING
+ : self::QUERY_ASCENDING;
+ }
+
/**
* @return IResultWrapper The result wrapper.
*/
}
/**
- * Do a query with specified parameters, rather than using the object
- * context
+ * Do a query with specified parameters, rather than using the object context
+ *
+ * @note For b/c, query direction is true for ascending and false for descending
*
* @param string $offset Index offset, inclusive
* @param int $limit Exact query limit
- * @param bool $descending Query direction, false for ascending, true for descending
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
* @return IResultWrapper
*/
- public function reallyDoQuery( $offset, $limit, $descending ) {
+ public function reallyDoQuery( $offset, $limit, $order ) {
list( $tables, $fields, $conds, $fname, $options, $join_conds ) =
- $this->buildQueryInfo( $offset, $limit, $descending );
+ $this->buildQueryInfo( $offset, $limit, $order );
return $this->mDb->select( $tables, $fields, $conds, $fname, $options, $join_conds );
}
/**
* Build variables to use by the database wrapper.
*
+ * @note For b/c, query direction is true for ascending and false for descending
+ *
* @param string $offset Index offset, inclusive
* @param int $limit Exact query limit
- * @param bool $descending Query direction, false for ascending, true for descending
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
* @return array
*/
- protected function buildQueryInfo( $offset, $limit, $descending ) {
+ protected function buildQueryInfo( $offset, $limit, $order ) {
$fname = __METHOD__ . ' (' . $this->getSqlComment() . ')';
$info = $this->getQueryInfo();
$tables = $info['tables'];
$options = $info['options'] ?? [];
$join_conds = $info['join_conds'] ?? [];
$sortColumns = array_merge( [ $this->mIndexField ], $this->mExtraSortFields );
- if ( $descending ) {
+ if ( $order === self::QUERY_ASCENDING ) {
$options['ORDER BY'] = $sortColumns;
$operator = $this->mIncludeOffset ? '>=' : '>';
} else {
*/
abstract class RangeChronologicalPager extends ReverseChronologicalPager {
+ /** @var string[] */
protected $rangeConds = [];
/**
*
* @param string $offset Index offset, inclusive
* @param int $limit Exact query limit
- * @param bool $descending Query direction, false for ascending, true for descending
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
* @return array
*/
- protected function buildQueryInfo( $offset, $limit, $descending ) {
+ protected function buildQueryInfo( $offset, $limit, $order ) {
list( $tables, $fields, $conds, $fname, $options, $join_conds ) = parent::buildQueryInfo(
$offset,
$limit,
- $descending
+ $order
);
if ( $this->rangeConds ) {
* @ingroup Pager
*/
abstract class ReverseChronologicalPager extends IndexPager {
+ /** @var bool */
public $mDefaultDirection = IndexPager::DIR_DESCENDING;
+ /** @var int */
public $mYear;
+ /** @var int */
public $mMonth;
+ /** @var int */
public $mDay;
public function getNavigationBar() {
* @ingroup Pager
*/
abstract class TablePager extends IndexPager {
+ /** @var string */
protected $mSort;
+ /** @var stdClass */
protected $mCurrentRow;
public function __construct( IContextSource $context = null ) {
];
}
- protected function buildQueryInfo( $offset, $limit, $descending ) {
+ protected function buildQueryInfo( $offset, $limit, $order ) {
$fname = __METHOD__ . ' (' . $this->getSqlComment() . ')';
$sortColumns = array_merge( [ $this->mIndexField ], $this->mExtraSortFields );
- if ( $descending ) {
+ if ( $order === self::QUERY_ASCENDING ) {
$dir = 'ASC';
$orderBy = $sortColumns;
$operator = $this->mIncludeOffset ? '>=' : '>';
}
/**
- * This function normally does a database query to get the results; we need
+ * This function normally does a database query to get the results; we need
* to make a pretend result using a FakeResultWrapper.
* @param string $offset
* @param int $limit
- * @param bool $descending
+ * @param bool $order
* @return FakeResultWrapper
*/
- function reallyDoQuery( $offset, $limit, $descending ) {
+ function reallyDoQuery( $offset, $limit, $order ) {
+ $asc = ( $order === self::QUERY_ASCENDING );
$result = new FakeResultWrapper( [] );
- $messageNames = $this->getAllMessages( $descending );
+ $messageNames = $this->getAllMessages( $order );
$statuses = self::getCustomisedStatuses( $messageNames, $this->langcode, $this->foreign );
$count = 0;
foreach ( $messageNames as $key ) {
$customised = isset( $statuses['pages'][$key] );
if ( $customised !== $this->custom &&
- ( $descending && ( $key < $offset || !$offset ) || !$descending && $key > $offset ) &&
+ ( $asc && ( $key < $offset || !$offset ) || !$asc && $key > $offset ) &&
( ( $this->prefix && preg_match( $this->prefix, $key ) ) || $this->prefix === false )
) {
$actual = wfMessage( $key )->inLanguage( $this->lang )->plain();
*
* @param string $offset Index offset, inclusive
* @param int $limit Exact query limit
- * @param bool $descending Query direction, false for ascending, true for descending
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
* @return IResultWrapper
*/
- function reallyDoQuery( $offset, $limit, $descending ) {
+ function reallyDoQuery( $offset, $limit, $order ) {
list( $tables, $fields, $conds, $fname, $options, $join_conds ) = $this->buildQueryInfo(
$offset,
$limit,
- $descending
+ $order
);
/*
) ];
Hooks::run(
'ContribsPager::reallyDoQuery',
- [ &$data, $this, $offset, $limit, $descending ]
+ [ &$data, $this, $offset, $limit, $order ]
);
$result = [];
}
// sort results
- if ( $descending ) {
+ if ( $order === self::QUERY_ASCENDING ) {
ksort( $result );
} else {
krsort( $result );
*
* @param string $offset Index offset, inclusive
* @param int $limit Exact query limit
- * @param bool $descending Query direction, false for ascending, true for descending
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
* @return IResultWrapper
*/
- function reallyDoQuery( $offset, $limit, $descending ) {
- $data = [ parent::reallyDoQuery( $offset, $limit, $descending ) ];
+ function reallyDoQuery( $offset, $limit, $order ) {
+ $data = [ parent::reallyDoQuery( $offset, $limit, $order ) ];
// This hook will allow extensions to add in additional queries, nearly
// identical to ContribsPager::reallyDoQuery.
Hooks::run(
'DeletedContribsPager::reallyDoQuery',
- [ &$data, $this, $offset, $limit, $descending ]
+ [ &$data, $this, $offset, $limit, $order ]
);
$result = [];
}
// sort results
- if ( $descending ) {
+ if ( $order === self::QUERY_ASCENDING ) {
ksort( $result );
} else {
krsort( $result );
* is descending, so I renamed it to $asc here.
* @param int $offset
* @param int $limit
- * @param bool $asc
- * @return array
+ * @param bool $order IndexPager::QUERY_ASCENDING or IndexPager::QUERY_DESCENDING
+ * @return FakeResultWrapper
* @throws MWException
*/
- function reallyDoQuery( $offset, $limit, $asc ) {
+ function reallyDoQuery( $offset, $limit, $order ) {
$prevTableName = $this->mTableName;
$this->mTableName = 'image';
list( $tables, $fields, $conds, $fname, $options, $join_conds ) =
- $this->buildQueryInfo( $offset, $limit, $asc );
+ $this->buildQueryInfo( $offset, $limit, $order );
$imageRes = $this->mDb->select( $tables, $fields, $conds, $fname, $options, $join_conds );
$this->mTableName = $prevTableName;
$this->mIndexField = 'oi_' . substr( $this->mIndexField, 4 );
list( $tables, $fields, $conds, $fname, $options, $join_conds ) =
- $this->buildQueryInfo( $offset, $limit, $asc );
+ $this->buildQueryInfo( $offset, $limit, $order );
$oldimageRes = $this->mDb->select( $tables, $fields, $conds, $fname, $options, $join_conds );
$this->mTableName = $prevTableName;
$this->mIndexField = $oldIndex;
- return $this->combineResult( $imageRes, $oldimageRes, $limit, $asc );
+ return $this->combineResult( $imageRes, $oldimageRes, $limit, $order );
}
/**
$cache = ObjectCache::getLocalClusterInstance();
$editInfo = $cache->get( $key );
+ $outputKey = $cache->makeKey( 'stashed-edit-output', $editInfo->outputID );
+ $editInfo->output = $cache->get( $outputKey );
$editInfo->output->setCacheTime( wfTimestamp( TS_MW,
wfTimestamp( TS_UNIX, $editInfo->output->getCacheTime() ) - $howOld - 1 ) );
MWTimestamp::setFakeTime( function () use ( &$clock ) {
return $clock += 1000;
} );
- $user = $this->getTestUser()->getUser();
- $firstRevision = self::makeEdit( $user, 'Help:UserTest_GetEditTimestamp', 'one', 'test' );
- $secondRevision = self::makeEdit( $user, 'Help:UserTest_GetEditTimestamp', 'two', 'test' );
- // Sanity check: revisions timestamp are different
- $this->assertNotEquals( $firstRevision->getTimestamp(), $secondRevision->getTimestamp() );
-
- $this->assertEquals( $firstRevision->getTimestamp(), $user->getFirstEditTimestamp() );
- $this->assertEquals( $secondRevision->getTimestamp(), $user->getLatestEditTimestamp() );
+ try {
+ $user = $this->getTestUser()->getUser();
+ $firstRevision = self::makeEdit( $user, 'Help:UserTest_GetEditTimestamp', 'one', 'test' );
+ $secondRevision = self::makeEdit( $user, 'Help:UserTest_GetEditTimestamp', 'two', 'test' );
+ // Sanity check: revisions timestamp are different
+ $this->assertNotEquals( $firstRevision->getTimestamp(), $secondRevision->getTimestamp() );
+
+ $this->assertEquals( $firstRevision->getTimestamp(), $user->getFirstEditTimestamp() );
+ $this->assertEquals( $secondRevision->getTimestamp(), $user->getLatestEditTimestamp() );
+ } finally {
+ MWTimestamp::setFakeTime( false );
+ }
}
/**